#include <asm-xen/xen_proc.h>
#include <asm-xen/linux-public/privcmd.h>
#include <asm-xen/gnttab.h>
+#include <asm/synch_bitops.h>
#if 1
#define ASSERT(_p) \
if ( (flags = nflags) & (GTF_reading|GTF_writing) )
printk(KERN_ALERT "WARNING: g.e. still in use!\n");
}
- while ( (nflags = cmpxchg(&shared[ref].flags, flags, 0)) != flags );
+ while ( (nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) != flags );
put_free_entry(ref);
}
* Otherwise invalidate the grant entry against future use.
*/
if ( likely(flags != GTF_accept_transfer) ||
- (cmpxchg(&shared[ref].flags, flags, 0) != GTF_accept_transfer) )
+ (synch_cmpxchg(&shared[ref].flags, flags, 0) != GTF_accept_transfer) )
while ( unlikely((frame = shared[ref].frame) == 0) )
cpu_relax();
return oldbit;
}
+struct __synch_xchg_dummy { unsigned long a[100]; };
+#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
+
+#define synch_cmpxchg(ptr, old, new) \
+((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
+ (unsigned long)(old), \
+ (unsigned long)(new), \
+ sizeof(*(ptr))))
+
+static inline unsigned long __synch_cmpxchg(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__synch_xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__synch_xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__synch_xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
{
return ((1UL << (nr & 31)) &